break;
case PAL_PTCE_INFO:
{
- ia64_ptce_info_t ptce;
- status = ia64_get_ptce(&ptce);
- if (status != 0) break;
- r9 = ptce.base;
- r10 = (ptce.count[0]<<32)|(ptce.count[1]&0xffffffffL);
- r11 = (ptce.stride[0]<<32)|(ptce.stride[1]&0xffffffffL);
+ // return hard-coded xen-specific values because ptc.e
+ // is emulated on xen to always flush everything
+ // these values result in only one ptc.e instruction
+ status = 0; r9 = 0; r10 = (1L << 32) | 1L; r11 = 0;
}
break;
case PAL_VERSION:
status = ia64_pal_cache_summary(&r9,&r10);
break;
case PAL_VM_SUMMARY:
- status = ia64_pal_vm_summary(&r9,&r10);
+ // FIXME: what should xen return for these, figure out later
+ // For now, linux does the right thing if pal call fails
+ // In particular, rid_size must be set properly!
+ //status = ia64_pal_vm_summary(&r9,&r10);
break;
case PAL_RSE_INFO:
status = ia64_pal_rse_info(&r9,&r10);
#define FAST_ACCESS_REFLECT
#define FAST_RFI
#define FAST_SSM_I
+#define FAST_PTC_GA
#undef RFI_TO_INTERRUPT // not working yet
#endif
+// FIXME: turn off for now... fix zero'ing regs, should be bank1?
+//#define HANDLE_AR_UNAT
+
// FIXME: This is defined in include/asm-ia64/hw_irq.h but this
// doesn't appear to be include'able from assembly?
#define IA64_TIMER_VECTOR 0xef
add r24=r24,r23;;
mov cr.iip=r24;;
// OK, now all set to go except for switch to virtual bank0
- mov r30=r2; mov r29=r3;; mov r28=ar.unat;
+ mov r30=r2; mov r29=r3;;
+#ifdef HANDLE_AR_UNAT
+ mov r28=ar.unat;
+#endif
adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
bsw.1;;
.mem.offset 8,0; st8.spill [r3]=r31,16 ;;
movl r31=XSI_IPSR;;
bsw.0 ;;
+ mov r2=r30; mov r3=r29;;
+#ifdef HANDLE_AR_UNAT
// bank0 regs have no NaT bit, so ensure they are NaT clean
mov r16=r0; mov r17=r0; mov r19=r0;
mov r21=r0; mov r22=r0; mov r23=r0;
mov r24=r0; mov r25=r0; mov r26=r0; mov r27=r0;
- mov r2=r30; mov r3=r29; mov ar.unat=r28;
+ mov ar.unat=r28;
+#endif
adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
st4 [r20]=r0 ;;
fast_tick_reflect_done:
add r20=r20,r23;;
mov cr.iip=r20;;
// OK, now all set to go except for switch to virtual bank0
- mov r30=r2; mov r29=r3;; mov r28=ar.unat;
+ mov r30=r2; mov r29=r3;;
+#ifdef HANDLE_AR_UNAT
+ mov r28=ar.unat;
+#endif
adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
bsw.1;;
.mem.offset 8,0; st8.spill [r3]=r31,16 ;;
movl r31=XSI_IPSR;;
bsw.0 ;;
+ mov r2=r30; mov r3=r29;;
+#ifdef HANDLE_AR_UNAT
// bank0 regs have no NaT bit, so ensure they are NaT clean
mov r16=r0; mov r17=r0; mov r19=r0;
mov r21=r0; mov r22=r0; mov r23=r0;
mov r24=r0; mov r25=r0; mov r26=r0; mov r27=r0;
- mov r2=r30; mov r3=r29; mov ar.unat=r28;
+ mov ar.unat=r28;
+#endif
adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
st4 [r20]=r0 ;;
mov pr=r31,-1 ;;
ENTRY(hyper_ptc_ga)
#ifdef CONFIG_SMP
FIXME: ptc.ga instruction requires spinlock for SMP
+#endif
+#ifndef FAST_PTC_GA
+ br.spnt.few dispatch_break_fault ;;
#endif
// FIXME: validate not flushing Xen addresses
#ifdef FAST_HYPERPRIVOP_CNT
if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
// TODO: do this faster
if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
+ if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
if (imm.up) { ipsr->up = 1; psr.up = 1; }
if (imm.be) {
}
if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
+ if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
#define itir_mask(itir) (~((1UL << itir_ps(itir)) - 1))
unsigned long vhpt_translate_count = 0;
-int in_vcpu_tpa = 0;
IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir)
{
unsigned long vipsr = PSCB(vcpu,ipsr);
unsigned long iip = regs->cr_iip;
unsigned long ipsr = regs->cr_ipsr;
-#if 0
- printk("vcpu_translate: bad address %p, viip=%p, vipsr=%p, iip=%p, ipsr=%p\n", address, viip, vipsr, iip, ipsr);
- if (in_vcpu_tpa) printk("vcpu_translate called from vcpu_tpa\n");
- while(1);
- panic_domain(0,"vcpu_translate: bad address %p\n", address);
-#endif
printk("vcpu_translate: bad address %p, viip=%p, vipsr=%p, iip=%p, ipsr=%p continuing\n", address, viip, vipsr, iip, ipsr);
}
/* check 1-entry TLB */
if ((trp = match_dtlb(vcpu,address))) {
dtlb_translate_count++;
-if (!in_vcpu_tpa) printf("vcpu_translate: found in vdtlb\n");
*pteval = trp->page_flags;
*itir = trp->itir;
return IA64_NO_FAULT;
UINT64 pteval, itir, mask;
IA64FAULT fault;
-in_vcpu_tpa=1;
fault = vcpu_translate(vcpu, vadr, 1, &pteval, &itir);
-in_vcpu_tpa=0;
if (fault == IA64_NO_FAULT)
{
mask = itir_mask(itir);
IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
{
// TODO: Only allowed for current vcpu
- UINT64 mpaddr, ps;
+ UINT64 mpaddr, paddr;
IA64FAULT fault;
- TR_ENTRY *trp;
- unsigned long lookup_domain_mpa(struct domain *,unsigned long);
- unsigned long pteval, dom_imva;
+ unsigned long translate_domain_mpaddr(unsigned long);
+ IA64FAULT vcpu_tpa(VCPU *, UINT64, UINT64 *);
- if ((trp = match_dtlb(vcpu,vadr))) {
- pteval = trp->page_flags;
- dom_imva = __va(pteval & _PFN_MASK);
- ia64_fc(dom_imva);
- return IA64_NO_FAULT;
- }
fault = vcpu_tpa(vcpu, vadr, &mpaddr);
if (fault == IA64_NO_FAULT) {
- struct domain *dom0;
- unsigned long dom0_start, dom0_size;
- if (vcpu == dom0) {
- if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
- printk("vcpu_fc: bad dom0 mpaddr %p!\n",mpaddr);
- }
- }
- pteval = lookup_domain_mpa(vcpu->domain,mpaddr);
- if (pteval) {
- dom_imva = __va(pteval & _PFN_MASK);
- ia64_fc(dom_imva);
- }
- else {
- REGS *regs = vcpu_regs(vcpu);
- printk("vcpu_fc: can't flush vadr=%p, iip=%p\n",
- vadr,regs->cr_iip);
- }
+ paddr = translate_domain_mpaddr(mpaddr);
+ ia64_fc(__va(paddr));
}
return fault;
}
+int ptce_count = 0;
IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
{
-
// Note that this only needs to be called once, i.e. the
// architected loop to purge the entire TLB, should use
// base = stride1 = stride2 = 0, count0 = count 1 = 1
- // FIXME: When VHPT is in place, flush that too!
#ifdef VHPT_GLOBAL
vhpt_flush(); // FIXME: This is overdoing it
#endif
// FIXME: validate not flushing Xen addresses
// if (Xen address) return(IA64_ILLOP_FAULT);
// FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
+//printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
#ifdef VHPT_GLOBAL
vhpt_flush_address(vadr,addr_range);
#endif